Skip to content
This repository has been archived by the owner on May 22, 2024. It is now read-only.

Commit

Permalink
Refactor kube.ToSpecGen parameters to struct
Browse files Browse the repository at this point in the history
Create kube.CtrSpecGenOptions and document parameters.
Follow-up on containers#8792 (comment)

Signed-off-by: Benedikt Ziemons <[email protected]>
  • Loading branch information
bziemons committed Dec 23, 2020
1 parent 9ac5ed1 commit 8566a53
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 31 deletions.
14 changes: 13 additions & 1 deletion pkg/domain/infra/abi/play.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,19 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
return nil, err
}

specGen, err := kube.ToSpecGen(ctx, container, container.Image, newImage, volumes, pod.ID(), podName, podInfraID, configMaps, seccompPaths, ctrRestartPolicy, p.NetNS.IsHost())
specgenOpts := kube.CtrSpecGenOptions{
Container: container,
Image: newImage,
Volumes: volumes,
PodID: pod.ID(),
PodName: podName,
PodInfraID: podInfraID,
ConfigMaps: configMaps,
SeccompPaths: seccompPaths,
RestartPolicy: ctrRestartPolicy,
NetNSIsHost: p.NetNS.IsHost(),
}
specGen, err := kube.ToSpecGen(ctx, &specgenOpts)
if err != nil {
return nil, err
}
Expand Down
83 changes: 53 additions & 30 deletions pkg/specgen/generate/kube/kube.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,30 +47,53 @@ func ToPodGen(ctx context.Context, podName string, podYAML *v1.PodTemplateSpec)
return p, nil
}

func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newImage *image.Image, volumes map[string]*KubeVolume, podID, podName, infraID string, configMaps []v1.ConfigMap, seccompPaths *KubeSeccompPaths, restartPolicy string, hostNet bool) (*specgen.SpecGenerator, error) {
s := specgen.NewSpecGenerator(iid, false)
type CtrSpecGenOptions struct {
// Container as read from the pod yaml
Container v1.Container
// Image available to use (pulled or found local)
Image *image.Image
// Volumes for all containers
Volumes map[string]*KubeVolume
// PodID of the parent pod
PodID string
// PodName of the parent pod
PodName string
// PodInfraID as the infrastructure container id
PodInfraID string
// ConfigMaps the configuration maps for environment variables
ConfigMaps []v1.ConfigMap
// SeccompPaths for finding the seccomp profile path
SeccompPaths *KubeSeccompPaths
// RestartPolicy defines the restart policy of the container
RestartPolicy string
// NetNSIsHost tells the container to use the host netns
NetNSIsHost bool
}

func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) {
s := specgen.NewSpecGenerator(opts.Container.Image, false)

// podName should be non-empty for Deployment objects to be able to create
// pod name should be non-empty for Deployment objects to be able to create
// multiple pods having containers with unique names
if len(podName) < 1 {
return nil, errors.Errorf("kubeContainerToCreateConfig got empty podName")
if len(opts.PodName) < 1 {
return nil, errors.Errorf("got empty pod name on container creation when playing kube")
}

s.Name = fmt.Sprintf("%s-%s", podName, containerYAML.Name)
s.Name = fmt.Sprintf("%s-%s", opts.PodName, opts.Container.Name)

s.Terminal = containerYAML.TTY
s.Terminal = opts.Container.TTY

s.Pod = podID
s.Pod = opts.PodID

setupSecurityContext(s, containerYAML)
setupSecurityContext(s, opts.Container)

// Since we prefix the container name with pod name to work-around the uniqueness requirement,
// the seccomp profile should reference the actual container name from the YAML
// but apply to the containers with the prefixed name
s.SeccompProfilePath = seccompPaths.FindForContainer(containerYAML.Name)
s.SeccompProfilePath = opts.SeccompPaths.FindForContainer(opts.Container.Name)

s.ResourceLimits = &spec.LinuxResources{}
milliCPU, err := quantityToInt64(containerYAML.Resources.Limits.Cpu())
milliCPU, err := quantityToInt64(opts.Container.Resources.Limits.Cpu())
if err != nil {
return nil, errors.Wrap(err, "Failed to set CPU quota")
}
Expand All @@ -82,12 +105,12 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
}

limit, err := quantityToInt64(containerYAML.Resources.Limits.Memory())
limit, err := quantityToInt64(opts.Container.Resources.Limits.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory limit")
}

memoryRes, err := quantityToInt64(containerYAML.Resources.Requests.Memory())
memoryRes, err := quantityToInt64(opts.Container.Resources.Requests.Memory())
if err != nil {
return nil, errors.Wrap(err, "Failed to set memory reservation")
}
Expand All @@ -107,7 +130,7 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
// TODO: We don't understand why specgen does not take of this, but
// integration tests clearly pointed out that it was required.
s.Command = []string{}
imageData, err := newImage.Inspect(ctx)
imageData, err := opts.Image.Inspect(ctx)
if err != nil {
return nil, err
}
Expand All @@ -134,26 +157,26 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
}
// If only the yaml.Command is specified, set it as the entrypoint and drop the image Cmd
if len(containerYAML.Command) != 0 {
entrypoint = containerYAML.Command
if len(opts.Container.Command) != 0 {
entrypoint = opts.Container.Command
cmd = []string{}
}
// Only override the cmd field if yaml.Args is specified
// Keep the image entrypoint, or the yaml.command if specified
if len(containerYAML.Args) != 0 {
cmd = containerYAML.Args
if len(opts.Container.Args) != 0 {
cmd = opts.Container.Args
}

s.Command = append(entrypoint, cmd...)
// FIXME,
// we are currently ignoring imageData.Config.ExposedPorts
if containerYAML.WorkingDir != "" {
s.WorkDir = containerYAML.WorkingDir
if opts.Container.WorkingDir != "" {
s.WorkDir = opts.Container.WorkingDir
}

annotations := make(map[string]string)
if infraID != "" {
annotations[ann.SandboxID] = infraID
if opts.PodInfraID != "" {
annotations[ann.SandboxID] = opts.PodInfraID
annotations[ann.ContainerType] = ann.ContainerTypeContainer
}
s.Annotations = annotations
Expand All @@ -165,22 +188,22 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
envs[keyval[0]] = keyval[1]
}

for _, env := range containerYAML.Env {
value := envVarValue(env, configMaps)
for _, env := range opts.Container.Env {
value := envVarValue(env, opts.ConfigMaps)

envs[env.Name] = value
}
for _, envFrom := range containerYAML.EnvFrom {
cmEnvs := envVarsFromConfigMap(envFrom, configMaps)
for _, envFrom := range opts.Container.EnvFrom {
cmEnvs := envVarsFromConfigMap(envFrom, opts.ConfigMaps)

for k, v := range cmEnvs {
envs[k] = v
}
}
s.Env = envs

for _, volume := range containerYAML.VolumeMounts {
volumeSource, exists := volumes[volume.Name]
for _, volume := range opts.Container.VolumeMounts {
volumeSource, exists := opts.Volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
Expand Down Expand Up @@ -212,9 +235,9 @@ func ToSpecGen(ctx context.Context, containerYAML v1.Container, iid string, newI
}
}

s.RestartPolicy = restartPolicy
s.RestartPolicy = opts.RestartPolicy

if hostNet {
if opts.NetNSIsHost {
s.NetNS.NSMode = specgen.Host
}

Expand Down

0 comments on commit 8566a53

Please sign in to comment.